import numpy as np
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import pandas as pd
from tensorflow import keras
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot
import plotly.graph_objects as go
import math
import seaborn as sns
from sklearn.metrics import mean_squared_error
np.random.seed(1)
tf.random.set_seed(1)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, GRU, Dropout, RepeatVector, TimeDistributed
from keras import backend
MODELFILENAME = 'MODELS/LSTM_6h_TFM_2c'
TIME_STEPS=36 #6h
CMODEL = LSTM
UNITS=45
DROPOUT1=0.118
DROPOUT2=0.243
ACTIVATION='tanh'
OPTIMIZER='adamax'
EPOCHS=43
BATCHSIZE=30
VALIDATIONSPLIT=0.2
# Code to read csv file into Colaboratory:
# from google.colab import files
# uploaded = files.upload()
# import io
# df = pd.read_csv(io.BytesIO(uploaded['SentDATA.csv']))
# Dataset is now stored in a Pandas Dataframe
df = pd.read_csv('../../data/dadesTFM.csv')
df.reset_index(inplace=True)
df['Time'] = pd.to_datetime(df['Time'])
df = df.set_index('Time')
columns = ['PM1','PM25','PM10','PM1ATM','PM25ATM','PM10ATM']
df1 = df.copy();
df1 = df1.rename(columns={"PM 1":"PM1","PM 2.5":"PM25","PM 10":"PM10","PM 1 ATM":"PM1ATM","PM 2.5 ATM":"PM25ATM","PM 10 ATM":"PM10ATM"})
df1['PM1'] = df['PM 1'].astype(np.float32)
df1['PM25'] = df['PM 2.5'].astype(np.float32)
df1['PM10'] = df['PM 10'].astype(np.float32)
df1['PM1ATM'] = df['PM 1 ATM'].astype(np.float32)
df1['PM25ATM'] = df['PM 2.5 ATM'].astype(np.float32)
df1['PM10ATM'] = df['PM 10 ATM'].astype(np.float32)
df2 = df1.copy()
train_size = int(len(df2) * 0.8)
test_size = len(df2) - train_size
train, test = df2.iloc[0:train_size], df2.iloc[train_size:len(df2)]
train.shape, test.shape
((3117, 7), (780, 7))
#Standardize the data
for col in columns:
scaler = StandardScaler()
train[col] = scaler.fit_transform(train[[col]])
<ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]]) <ipython-input-6-83cecdbc25f8>:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy train[col] = scaler.fit_transform(train[[col]])
def create_sequences(X, y, time_steps=TIME_STEPS):
Xs, ys = [], []
for i in range(len(X)-time_steps):
Xs.append(X.iloc[i:(i+time_steps)].values)
ys.append(y.iloc[i+time_steps])
return np.array(Xs), np.array(ys)
X_train, y_train = create_sequences(train[[columns[1]]], train[columns[1]])
#X_test, y_test = create_sequences(test[[columns[1]]], test[columns[1]])
print(f'X_train shape: {X_train.shape}')
print(f'y_train shape: {y_train.shape}')
X_train shape: (3081, 36, 1) y_train shape: (3081,)
#afegir nova mètrica
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
model = Sequential()
model.add(CMODEL(units = UNITS, return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(rate=DROPOUT1))
model.add(CMODEL(units = UNITS, return_sequences=True))
model.add(Dropout(rate=DROPOUT2))
model.add(TimeDistributed(Dense(1,kernel_initializer='normal',activation=ACTIVATION)))
model.compile(optimizer=OPTIMIZER, loss='mae',metrics=['mse',rmse])
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= lstm (LSTM) (None, 36, 45) 8460 _________________________________________________________________ dropout (Dropout) (None, 36, 45) 0 _________________________________________________________________ lstm_1 (LSTM) (None, 36, 45) 16380 _________________________________________________________________ dropout_1 (Dropout) (None, 36, 45) 0 _________________________________________________________________ time_distributed (TimeDistri (None, 36, 1) 46 ================================================================= Total params: 24,886 Trainable params: 24,886 Non-trainable params: 0 _________________________________________________________________
history = model.fit(X_train, y_train, epochs=EPOCHS, batch_size=BATCHSIZE, validation_split=VALIDATIONSPLIT,
callbacks=[keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, mode='min')], shuffle=False)
Epoch 1/43 83/83 [==============================] - 3s 34ms/step - loss: 0.6741 - mse: 0.8155 - rmse: 0.6912 - val_loss: 0.4614 - val_mse: 0.4068 - val_rmse: 0.5076 Epoch 2/43 83/83 [==============================] - 2s 22ms/step - loss: 0.5772 - mse: 0.6390 - rmse: 0.6185 - val_loss: 0.3982 - val_mse: 0.3589 - val_rmse: 0.4572 Epoch 3/43 83/83 [==============================] - 2s 23ms/step - loss: 0.5560 - mse: 0.6103 - rmse: 0.6024 - val_loss: 0.3712 - val_mse: 0.3388 - val_rmse: 0.4326 Epoch 4/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5444 - mse: 0.5930 - rmse: 0.5942 - val_loss: 0.3564 - val_mse: 0.3275 - val_rmse: 0.4177 Epoch 5/43 83/83 [==============================] - 2s 23ms/step - loss: 0.5361 - mse: 0.5813 - rmse: 0.5879 - val_loss: 0.3453 - val_mse: 0.3193 - val_rmse: 0.4061 Epoch 6/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5303 - mse: 0.5726 - rmse: 0.5827 - val_loss: 0.3366 - val_mse: 0.3130 - val_rmse: 0.3968 Epoch 7/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5249 - mse: 0.5656 - rmse: 0.5778 - val_loss: 0.3304 - val_mse: 0.3085 - val_rmse: 0.3896 Epoch 8/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5206 - mse: 0.5595 - rmse: 0.5736 - val_loss: 0.3252 - val_mse: 0.3047 - val_rmse: 0.3833 Epoch 9/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5170 - mse: 0.5547 - rmse: 0.5701 - val_loss: 0.3208 - val_mse: 0.3016 - val_rmse: 0.3780 Epoch 10/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5144 - mse: 0.5513 - rmse: 0.5676 - val_loss: 0.3157 - val_mse: 0.2988 - val_rmse: 0.3726 Epoch 11/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5115 - mse: 0.5476 - rmse: 0.5647 - val_loss: 0.3135 - val_mse: 0.2970 - val_rmse: 0.3693 Epoch 12/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5093 - mse: 0.5448 - rmse: 0.5625 - val_loss: 0.3106 - val_mse: 0.2953 - val_rmse: 0.3658 Epoch 13/43 83/83 [==============================] - 2s 22ms/step - loss: 0.5071 - mse: 0.5422 - rmse: 0.5604 - val_loss: 0.3075 - val_mse: 0.2935 - val_rmse: 0.3622 Epoch 14/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5057 - mse: 0.5401 - rmse: 0.5587 - val_loss: 0.3056 - val_mse: 0.2924 - val_rmse: 0.3596 Epoch 15/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5037 - mse: 0.5377 - rmse: 0.5567 - val_loss: 0.3044 - val_mse: 0.2914 - val_rmse: 0.3575 Epoch 16/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5025 - mse: 0.5360 - rmse: 0.5552 - val_loss: 0.3025 - val_mse: 0.2904 - val_rmse: 0.3550 Epoch 17/43 83/83 [==============================] - 2s 21ms/step - loss: 0.5012 - mse: 0.5344 - rmse: 0.5538 - val_loss: 0.3008 - val_mse: 0.2894 - val_rmse: 0.3527 Epoch 18/43 83/83 [==============================] - 2s 22ms/step - loss: 0.4999 - mse: 0.5330 - rmse: 0.5523 - val_loss: 0.2998 - val_mse: 0.2888 - val_rmse: 0.3510 Epoch 19/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4990 - mse: 0.5318 - rmse: 0.5512 - val_loss: 0.2985 - val_mse: 0.2881 - val_rmse: 0.3491 Epoch 20/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4976 - mse: 0.5301 - rmse: 0.5498 - val_loss: 0.2980 - val_mse: 0.2877 - val_rmse: 0.3478 Epoch 21/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4967 - mse: 0.5287 - rmse: 0.5488 - val_loss: 0.2967 - val_mse: 0.2870 - val_rmse: 0.3459 Epoch 22/43 83/83 [==============================] - 2s 22ms/step - loss: 0.4957 - mse: 0.5280 - rmse: 0.5476 - val_loss: 0.2961 - val_mse: 0.2865 - val_rmse: 0.3446 Epoch 23/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4952 - mse: 0.5271 - rmse: 0.5468 - val_loss: 0.2953 - val_mse: 0.2861 - val_rmse: 0.3432 Epoch 24/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4941 - mse: 0.5260 - rmse: 0.5458 - val_loss: 0.2944 - val_mse: 0.2857 - val_rmse: 0.3419 Epoch 25/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4936 - mse: 0.5251 - rmse: 0.5452 - val_loss: 0.2937 - val_mse: 0.2852 - val_rmse: 0.3405 Epoch 26/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4928 - mse: 0.5241 - rmse: 0.5444 - val_loss: 0.2931 - val_mse: 0.2849 - val_rmse: 0.3395 Epoch 27/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4923 - mse: 0.5233 - rmse: 0.5436 - val_loss: 0.2926 - val_mse: 0.2845 - val_rmse: 0.3383 Epoch 28/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4919 - mse: 0.5232 - rmse: 0.5432 - val_loss: 0.2916 - val_mse: 0.2840 - val_rmse: 0.3369 Epoch 29/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4910 - mse: 0.5221 - rmse: 0.5424 - val_loss: 0.2917 - val_mse: 0.2837 - val_rmse: 0.3363 Epoch 30/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4908 - mse: 0.5216 - rmse: 0.5422 - val_loss: 0.2908 - val_mse: 0.2833 - val_rmse: 0.3350 Epoch 31/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4904 - mse: 0.5213 - rmse: 0.5415 - val_loss: 0.2900 - val_mse: 0.2829 - val_rmse: 0.3338 Epoch 32/43 83/83 [==============================] - 2s 22ms/step - loss: 0.4900 - mse: 0.5203 - rmse: 0.5413 - val_loss: 0.2897 - val_mse: 0.2827 - val_rmse: 0.3331 Epoch 33/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4895 - mse: 0.5205 - rmse: 0.5409 - val_loss: 0.2896 - val_mse: 0.2826 - val_rmse: 0.3325 Epoch 34/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4893 - mse: 0.5198 - rmse: 0.5403 - val_loss: 0.2890 - val_mse: 0.2822 - val_rmse: 0.3315 Epoch 35/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4887 - mse: 0.5192 - rmse: 0.5399 - val_loss: 0.2884 - val_mse: 0.2820 - val_rmse: 0.3306 Epoch 36/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4881 - mse: 0.5186 - rmse: 0.5392 - val_loss: 0.2879 - val_mse: 0.2817 - val_rmse: 0.3297 Epoch 37/43 83/83 [==============================] - 2s 22ms/step - loss: 0.4878 - mse: 0.5180 - rmse: 0.5389 - val_loss: 0.2874 - val_mse: 0.2814 - val_rmse: 0.3289 Epoch 38/43 83/83 [==============================] - 2s 24ms/step - loss: 0.4876 - mse: 0.5179 - rmse: 0.5389 - val_loss: 0.2870 - val_mse: 0.2812 - val_rmse: 0.3282 Epoch 39/43 83/83 [==============================] - 2s 24ms/step - loss: 0.4872 - mse: 0.5177 - rmse: 0.5383 - val_loss: 0.2871 - val_mse: 0.2810 - val_rmse: 0.3278 Epoch 40/43 83/83 [==============================] - 2s 22ms/step - loss: 0.4874 - mse: 0.5177 - rmse: 0.5384 - val_loss: 0.2867 - val_mse: 0.2809 - val_rmse: 0.3272 Epoch 41/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4868 - mse: 0.5167 - rmse: 0.5376 - val_loss: 0.2865 - val_mse: 0.2808 - val_rmse: 0.3267 Epoch 42/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4865 - mse: 0.5169 - rmse: 0.5374 - val_loss: 0.2861 - val_mse: 0.2805 - val_rmse: 0.3259 Epoch 43/43 83/83 [==============================] - 2s 21ms/step - loss: 0.4861 - mse: 0.5163 - rmse: 0.5371 - val_loss: 0.2857 - val_mse: 0.2805 - val_rmse: 0.3254
import matplotlib.pyplot as plt
plt.plot(history.history['loss'], label='MAE Training loss')
plt.plot(history.history['val_loss'], label='MAE Validation loss')
plt.plot(history.history['mse'], label='MSE Training loss')
plt.plot(history.history['val_mse'], label='MSE Validation loss')
plt.plot(history.history['rmse'], label='RMSE Training loss')
plt.plot(history.history['val_rmse'], label='RMSE Validation loss')
plt.legend();
X_train_pred = model.predict(X_train, verbose=0)
train_mae_loss = np.mean(np.abs(X_train_pred - X_train), axis=1)
plt.hist(train_mae_loss, bins=50)
plt.xlabel('Train MAE loss')
plt.ylabel('Number of Samples');
def evaluate_prediction(predictions, actual, model_name):
errors = predictions - actual
mse = np.square(errors).mean()
rmse = np.sqrt(mse)
mae = np.abs(errors).mean()
print(model_name + ':')
print('Mean Absolute Error: {:.4f}'.format(mae))
print('Root Mean Square Error: {:.4f}'.format(rmse))
print('Mean Square Error: {:.4f}'.format(mse))
print('')
return mae,rmse,mse
mae,rmse,mse = evaluate_prediction(X_train_pred, X_train,"LSTM")
LSTM: Mean Absolute Error: 0.2345 Root Mean Square Error: 0.4646 Mean Square Error: 0.2159
model.save(MODELFILENAME+'.h5')
#càlcul del threshold de test
def calculate_threshold(X_test, X_test_pred):
distance = np.sqrt(np.mean(np.square(X_test_pred - X_test),axis=1))
"""Sorting the scores/diffs and using a 0.80 as cutoff value to pick the threshold"""
distance.sort();
cut_off = int(0.95 * len(distance));
threshold = distance[cut_off];
return threshold
for col in columns:
print ("####################### "+col +" ###########################")
#Standardize the test data
scaler = StandardScaler()
test_cpy = test.copy()
test[col] = scaler.fit_transform(test[[col]])
#creem seqüencia amb finestra temporal per les dades de test
X_test1, y_test1 = create_sequences(test[[col]], test[col])
print(f'Testing shape: {X_test1.shape}')
#evaluem el model
eval = model.evaluate(X_test1, y_test1)
print("evaluate: ",eval)
#predim el model
X_test1_pred = model.predict(X_test1, verbose=0)
evaluate_prediction(X_test1_pred, X_test1,"LSTM")
#càlcul del mae_loss
test1_mae_loss = np.mean(np.abs(X_test1_pred - X_test1), axis=1)
test1_rmse_loss = np.sqrt(np.mean(np.square(X_test1_pred - X_test1),axis=1))
# reshaping test prediction
X_test1_predReshape = X_test1_pred.reshape((X_test1_pred.shape[0] * X_test1_pred.shape[1]), X_test1_pred.shape[2])
# reshaping test data
X_test1Reshape = X_test1.reshape((X_test1.shape[0] * X_test1.shape[1]), X_test1.shape[2])
threshold_test = calculate_threshold(X_test1Reshape,X_test1_predReshape)
test1_score_df = pd.DataFrame(test[TIME_STEPS:])
test1_score_df['loss'] = test1_rmse_loss.reshape((-1))
test1_score_df['threshold'] = threshold_test
test1_score_df['anomaly'] = test1_score_df['loss'] > test1_score_df['threshold']
test1_score_df[col] = test[TIME_STEPS:][col]
#gràfic test lost i threshold
fig = go.Figure()
fig.add_trace(go.Scatter(x=test1_score_df.index, y=test1_score_df['loss'], name='Test loss'))
fig.add_trace(go.Scatter(x=test1_score_df.index, y=test1_score_df['threshold'], name='Threshold'))
fig.update_layout(showlegend=True, title='Test loss vs. Threshold')
fig.show()
#Posem les anomalies en un array
anomalies1 = test1_score_df.loc[test1_score_df['anomaly'] == True]
anomalies1.shape
print('anomalies: ',anomalies1.shape); print();
#Gràfic dels punts i de les anomalíes amb els valors de dades transformades per verificar que la normalització que s'ha fet no distorssiona les dades
fig = go.Figure()
fig.add_trace(go.Scatter(x=test1_score_df.index, y=scaler.inverse_transform(test1_score_df[col]), name=col))
fig.add_trace(go.Scatter(x=anomalies1.index, y=scaler.inverse_transform(anomalies1[col]), mode='markers', name='Anomaly'))
fig.update_layout(showlegend=True, title='Detected anomalies')
fig.show()
print ("######################################################")
####################### PM1 ########################### Testing shape: (744, 36, 1) 9/24 [==========>...................] - ETA: 0s - loss: 0.4150 - mse: 0.8601 - rmse: 0.4919
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy test[col] = scaler.fit_transform(test[[col]])
24/24 [==============================] - 0s 7ms/step - loss: 0.5546 - mse: 0.9597 - rmse: 0.6344 evaluate: [0.5545950531959534, 0.9596891403198242, 0.6343836784362793] LSTM: Mean Absolute Error: 0.2327 Root Mean Square Error: 0.6046 Mean Square Error: 0.3656
anomalies: (61, 10)
###################################################### ####################### PM25 ########################### Testing shape: (744, 36, 1) 9/24 [==========>...................] - ETA: 0s - loss: 0.4204 - mse: 0.6471 - rmse: 0.4923
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
24/24 [==============================] - 0s 6ms/step - loss: 0.5733 - mse: 0.9198 - rmse: 0.6523 evaluate: [0.573311984539032, 0.919805109500885, 0.6522879004478455] LSTM: Mean Absolute Error: 0.2452 Root Mean Square Error: 0.5608 Mean Square Error: 0.3145
anomalies: (95, 10)
###################################################### ####################### PM10 ########################### Testing shape: (744, 36, 1) 1/24 [>.............................] - ETA: 0s - loss: 0.5217 - mse: 0.7754 - rmse: 0.68
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
24/24 [==============================] - 0s 6ms/step - loss: 0.5860 - mse: 0.9032 - rmse: 0.6666 evaluate: [0.5859569311141968, 0.9031836986541748, 0.6665500402450562] LSTM: Mean Absolute Error: 0.2520 Root Mean Square Error: 0.5229 Mean Square Error: 0.2734
anomalies: (57, 10)
###################################################### ####################### PM1ATM ########################### Testing shape: (744, 36, 1) 9/24 [==========>...................] - ETA: 0s - loss: 0.4614 - mse: 0.7991 - rmse: 0.5462
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
24/24 [==============================] - 0s 6ms/step - loss: 0.5964 - mse: 0.9281 - rmse: 0.6821 evaluate: [0.5963577628135681, 0.9280717372894287, 0.6821048855781555] LSTM: Mean Absolute Error: 0.2517 Root Mean Square Error: 0.5341 Mean Square Error: 0.2853
anomalies: (61, 10)
###################################################### ####################### PM25ATM ########################### Testing shape: (744, 36, 1) 9/24 [==========>...................] - ETA: 0s - loss: 0.4591 - mse: 0.8295 - rmse: 0.5429
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
24/24 [==============================] - 0s 6ms/step - loss: 0.5912 - mse: 0.9310 - rmse: 0.6761 evaluate: [0.5911722779273987, 0.9310407638549805, 0.6761285662651062] LSTM: Mean Absolute Error: 0.2490 Root Mean Square Error: 0.5444 Mean Square Error: 0.2964
anomalies: (61, 10)
###################################################### ####################### PM10ATM ########################### Testing shape: (744, 36, 1) 9/24 [==========>...................] - ETA: 0s - loss: 0.4347 - mse: 0.7392 - rmse: 0.5072
<ipython-input-17-48420fb1aa44>:8: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
24/24 [==============================] - 0s 6ms/step - loss: 0.5828 - mse: 0.9020 - rmse: 0.6621 evaluate: [0.5827909111976624, 0.9019755721092224, 0.6621447205543518] LSTM: Mean Absolute Error: 0.2516 Root Mean Square Error: 0.5461 Mean Square Error: 0.2982
anomalies: (61, 10)
######################################################